void arch_do_createdomain(struct vcpu *v)
{
struct domain *d = v->domain;
+ l1_pgentry_t gdt_l1e;
int vcpuid;
if ( is_idle_task(d) )
* GDT, and the old VCPU# is invalid in the new domain, we would otherwise
* try to load CS from an invalid table.
*/
+ gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
- {
d->arch.mm_perdomain_pt[
- (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] =
- l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
- }
+ (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = gdt_l1e;
v->arch.guest_vtable = __linear_l2_table;
v->arch.shadow_vtable = __shadow_linear_l2_table;
static void __init start_of_day(void)
{
int i;
- unsigned long vgdt;
+ unsigned long vgdt, gdt_pfn;
early_cpu_init();
* noted in arch_do_createdomain(), we must map for every possible VCPU#.
*/
vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE;
+ gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT;
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
{
- map_pages_to_xen(
- vgdt, virt_to_phys(gdt_table) >> PAGE_SHIFT, 1, PAGE_HYPERVISOR);
+ map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR);
vgdt += 1 << PDPT_VCPU_VA_SHIFT;
}